return exact ? NULL : *q;
}
+struct cpupool *cpupool_get_by_id(int poolid)
+{
+ struct cpupool *c;
+ /* cpupool_ctl_lock protects against concurrent pool destruction */
+ spin_lock(&cpupool_ctl_lock);
+ c = cpupool_find_by_id(poolid, 1);
+ if ( c == NULL )
+ spin_unlock(&cpupool_ctl_lock);
+ return c;
+}
+
+void cpupool_put(struct cpupool *pool)
+{
+ spin_unlock(&cpupool_ctl_lock);
+}
+
/*
* create a new cpupool with specified poolid and scheduler
* returns pointer to new cpupool structure if okay, NULL else
* - poolid already used
* - unknown scheduler
*/
-struct cpupool *cpupool_create(int poolid, char *sched)
+static struct cpupool *cpupool_create(
+ int poolid, unsigned int sched_id, int *perr)
{
struct cpupool *c;
struct cpupool **q;
int last = 0;
+ *perr = -ENOMEM;
if ( (c = alloc_cpupool_struct()) == NULL )
return NULL;
memset(c, 0, sizeof(*c));
- cpupool_dprintk("cpupool_create(pool=%d,sched=%s)\n", poolid, sched);
+ cpupool_dprintk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
+
spin_lock(&cpupool_lock);
+
for_each_cpupool(q)
{
last = (*q)->cpupool_id;
{
spin_unlock(&cpupool_lock);
free_cpupool_struct(c);
+ *perr = -EEXIST;
return NULL;
}
c->next = *q;
}
- *q = c;
+
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
- if ( (c->sched = scheduler_alloc(sched)) == NULL )
+ if ( poolid == 0 )
{
- spin_unlock(&cpupool_lock);
- cpupool_destroy(c);
- return NULL;
+ c->sched = scheduler_get_default();
+ }
+ else
+ {
+ c->sched = scheduler_alloc(sched_id, perr);
+ if ( c->sched == NULL )
+ {
+ spin_unlock(&cpupool_lock);
+ free_cpupool_struct(c);
+ return NULL;
+ }
}
+
+ *q = c;
+
spin_unlock(&cpupool_lock);
cpupool_dprintk("Created cpupool %d with scheduler %s (%s)\n",
c->cpupool_id, c->sched->name, c->sched->opt_name);
+ *perr = 0;
return c;
}
/*
* - cpus still assigned to pool
* - pool not in list
*/
-int cpupool_destroy(struct cpupool *c)
+static int cpupool_destroy(struct cpupool *c)
{
struct cpupool **q;
for_each_cpupool(q)
if ( *q == c )
break;
- if ( (*q != c) || (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
+ if ( *q != c )
{
spin_unlock(&cpupool_lock);
- return 1;
+ return -ENOENT;
+ }
+ if ( (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
+ {
+ spin_unlock(&cpupool_lock);
+ return -EBUSY;
}
*q = c->next;
spin_unlock(&cpupool_lock);
case XEN_SYSCTL_CPUPOOL_OP_CREATE:
{
int poolid;
- const struct scheduler *sched;
poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
CPUPOOLID_NONE: op->cpupool_id;
- sched = scheduler_get_by_id(op->sched_id);
- ret = -ENOENT;
- if ( sched == NULL )
- break;
- ret = 0;
- c = cpupool_create(poolid, sched->opt_name);
- if ( c == NULL )
- ret = -EINVAL;
- else
+ c = cpupool_create(poolid, op->sched_id, &ret);
+ if ( c != NULL )
op->cpupool_id = c->cpupool_id;
}
break;
ret = -ENOENT;
if ( c == NULL )
break;
- ret = (cpupool_destroy(c) != 0) ? -EBUSY : 0;
+ ret = cpupool_destroy(c);
}
break;
static int __init cpupool_presmp_init(void)
{
+ int err;
void *cpu = (void *)(long)smp_processor_id();
- cpupool0 = cpupool_create(0, NULL);
+ cpupool0 = cpupool_create(0, 0, &err);
BUG_ON(cpupool0 == NULL);
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
long sched_adjust_global(struct xen_sysctl_scheduler_op *op)
{
- const struct scheduler *sched;
-
- sched = scheduler_get_by_id(op->sched_id);
- if ( sched == NULL )
- return -ESRCH;
+ struct cpupool *pool;
+ int rc;
if ( (op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
(op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) )
return -EINVAL;
- return SCHED_OP(sched, adjust_global, op);
+ pool = cpupool_get_by_id(op->cpupool_id);
+ if ( pool == NULL )
+ return -ESRCH;
+
+ if ( op->sched_id != pool->sched->sched_id )
+ {
+ cpupool_put(pool);
+ return -EINVAL;
+ }
+
+ rc = SCHED_OP(pool->sched, adjust_global, op);
+
+ cpupool_put(pool);
+
+ return rc;
}
static void vcpu_periodic_timer_work(struct vcpu *v)
vcpu_unblock(v);
}
-/* Get scheduler by id */
-const struct scheduler *scheduler_get_by_id(unsigned int id)
-{
- int i;
-
- for ( i = 0; schedulers[i] != NULL; i++ )
- {
- if ( schedulers[i]->sched_id == id )
- return schedulers[i];
- }
- return NULL;
-}
-
static int cpu_schedule_up(unsigned int cpu)
{
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
}
-struct scheduler *scheduler_alloc(char *name)
+struct scheduler *scheduler_get_default(void)
+{
+ return &ops;
+}
+
+struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr)
{
int i;
- const struct scheduler *data;
struct scheduler *sched;
- if ( name == NULL )
- return &ops;
-
- data = &ops;
- for ( i = 0; (schedulers[i] != NULL) && (name != NULL) ; i++ )
- {
- if ( strcmp(schedulers[i]->opt_name, name) == 0 )
- {
- data = schedulers[i];
- break;
- }
- }
+ for ( i = 0; schedulers[i] != NULL; i++ )
+ if ( schedulers[i]->sched_id == sched_id )
+ goto found;
+ *perr = -ENOENT;
+ return NULL;
+ found:
+ *perr = -ENOMEM;
if ( (sched = xmalloc(struct scheduler)) == NULL )
return NULL;
- memcpy(sched, data, sizeof(*sched));
- if ( SCHED_OP(sched, init) != 0 )
+ memcpy(sched, schedulers[i], sizeof(*sched));
+ if ( (*perr = SCHED_OP(sched, init)) != 0 )
{
xfree(sched);
sched = NULL;
struct scheduler;
-struct scheduler *scheduler_alloc(char *name);
+struct scheduler *scheduler_get_default(void);
+struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr);
void scheduler_free(struct scheduler *sched);
void schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
#define CPUPOOLID_NONE -1
-struct cpupool *cpupool_create(int poolid, char *sched);
-int cpupool_destroy(struct cpupool *c);
+struct cpupool *cpupool_get_by_id(int poolid);
+void cpupool_put(struct cpupool *pool);
int cpupool_add_domain(struct domain *d, int poolid);
void cpupool_rm_domain(struct domain *d);
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);